n <- 50 # number of trajectories per noise level
noises <- seq(0.2, 3, 0.2)
freq <- 1 # compute sinus every 1 time unit
# Phase shifted
ps <- multi_sims(type = "ps", noises = noises, n = n, freq = freq)
ps
## Time variable value noise
## 1: 0 V1 0.000000000 0
## 2: 1 V1 0.841470985 0
## 3: 2 V1 0.909297427 0
## 4: 3 V1 0.141120008 0
## 5: 4 V1 -0.756802495 0
## ---
## 79996: 95 V50 -0.906374389 3
## 79997: 96 V50 -0.845216967 3
## 79998: 97 V50 -0.006970963 3
## 79999: 98 V50 0.837684112 3
## 80000: 99 V50 0.912176278 3
# Phase shifted with trend
pst <- multi_sims(type = "pst", noises = noises, n = n, slope = 0.1, freq = freq)
# Amplitude noise
na <- multi_sims(type = "na", noises = noises, n = n, freq = freq)
plot_sim(ps)
plot_sim(pst)
plot_sim(na)
For each condition, this computes the distance between the average trajectory and each individual trajectory.
cond <- "noise"
ti <- "Time"
mea <- "value"
lab <- "variable"
ps_mean <- dist_mean(data = ps, condition = cond, tcol = ti, measure = mea, label = lab, return.mean = F)
ps_mean
## noise variable euclid_to_mean
## 1: 0 V1 0.000000
## 2: 0 V2 0.000000
## 3: 0 V3 0.000000
## 4: 0 V4 0.000000
## 5: 0 V5 0.000000
## ---
## 796: 3 V46 8.259711
## 797: 3 V47 6.168910
## 798: 3 V48 6.083081
## 799: 3 V49 8.237276
## 800: 3 V50 8.206210
pst_mean <- dist_mean(data = pst, condition = cond, tcol = ti, measure = mea, label = lab, return.mean = F)
na_mean <- dist_mean(data = na, condition = cond, tcol = ti, measure = mea, label = lab, return.mean = F)
For each condition, take each pair of trajectories and compute the correlations between them (Pearson, Spearman and Kendall). In addition it clips the trajectories by comparison with their rolling means: whenever the trajectory is above its rolling mean, set it to 1 otherwise to 0. Finally for each pair of trajectories, the “overlap” metric, represents how often the trajectories of the pair have same value (i.e. 0.5 if complete random, 1 if they fully overlap).
This takes quite a while (partially) because it is implemented with for loops, but is still performed in reasonable time (i.e. a few minutes)
ps_pw <- all_pairwise_stats(data = ps, condition = cond, label = lab, measure = mea, k_roll_mean = 5)
ps_pw
## noise Label1 Label2 Overlap Pearson Spearman Kendall
## 1: 0 V1 V2 1.00 1.0000000 1.0000000 1.0000000
## 2: 0 V1 V3 1.00 1.0000000 1.0000000 1.0000000
## 3: 0 V1 V4 1.00 1.0000000 1.0000000 1.0000000
## 4: 0 V1 V5 1.00 1.0000000 1.0000000 1.0000000
## 5: 0 V1 V6 1.00 1.0000000 1.0000000 1.0000000
## ---
## 19596: 3 V47 V49 0.16 -0.9138289 -0.8993699 -0.7305051
## 19597: 3 V47 V50 0.10 -0.9517584 -0.9514071 -0.8161616
## 19598: 3 V48 V49 0.22 -0.7556510 -0.7351935 -0.5426263
## 19599: 3 V48 V50 0.28 -0.6818917 -0.6389319 -0.4569697
## 19600: 3 V49 V50 0.94 0.9943553 0.9890429 0.9143434
pst_pw <- all_pairwise_stats(data = pst, condition = cond, label = lab, measure = mea, k_roll_mean = 5)
na_pw <- all_pairwise_stats(data = na, condition = cond, label = lab, measure = mea, k_roll_mean = 5)
cor(ps_pw[, 4:7])
## Overlap Pearson Spearman Kendall
## Overlap 1.0000000 0.9888408 0.9905584 0.9984593
## Pearson 0.9888408 1.0000000 0.9997611 0.9901594
## Spearman 0.9905584 0.9997611 1.0000000 0.9919188
## Kendall 0.9984593 0.9901594 0.9919188 1.0000000
cor(pst_pw[, 4:7])
## Overlap Pearson Spearman Kendall
## Overlap 1.0000000 0.9889862 0.9892888 0.9856745
## Pearson 0.9889862 1.0000000 0.9998218 0.9726862
## Spearman 0.9892888 0.9998218 1.0000000 0.9738838
## Kendall 0.9856745 0.9726862 0.9738838 1.0000000
cor(na_pw[, 4:7])
## Overlap Pearson Spearman Kendall
## Overlap 1.0000000 0.9520328 0.9524732 0.9666527
## Pearson 0.9520328 1.0000000 0.9964614 0.9807784
## Spearman 0.9524732 0.9964614 1.0000000 0.9809059
## Kendall 0.9666527 0.9807784 0.9809059 1.0000000
For each condition, count the proportion of trajectories that are lying more than x standard deviation from the mean (conversion to z-score first?).
Get a p-value: Assume that they are normally distributed, check that the distributions lies above 0 for correlations and above 0.5 for clipping (t-test).
ps_pw[noise !=0 , .(p.val.pears = t.test(Pearson)$p.value,
p.val.spear = t.test(Spearman)$p.value,
p.val.kenda = t.test(Kendall)$p.value,
p.val.ovrlp = t.test(Overlap, mu = 0.5)$p.value), by = noise]
## noise p.val.pears p.val.spear p.val.kenda p.val.ovrlp
## 1: 0.2 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 2: 0.4 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 3: 0.6 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 4: 0.8 8.385208e-131 6.749832e-131 2.645778e-126 2.397417e-124
## 5: 1.0 5.440177e-85 2.134098e-85 2.507674e-83 2.238830e-82
## 6: 1.2 3.107927e-74 1.497903e-74 1.351757e-72 1.718361e-73
## 7: 1.4 9.004623e-86 2.306209e-86 1.653604e-85 8.234132e-90
## 8: 1.6 9.986831e-01 9.894211e-01 9.219714e-01 9.882245e-01
## 9: 1.8 1.009961e-07 1.020201e-07 1.145670e-07 3.769205e-07
## 10: 2.0 4.754487e-01 4.950908e-01 5.393231e-01 5.874802e-01
## 11: 2.2 8.430505e-01 8.402287e-01 9.167348e-01 9.418730e-01
## 12: 2.4 5.084064e-01 5.033137e-01 5.062325e-01 6.311104e-01
## 13: 2.6 7.889741e-01 8.062834e-01 7.759452e-01 8.100983e-01
## 14: 2.8 4.280022e-01 4.301364e-01 4.238552e-01 4.283233e-01
## 15: 3.0 7.128542e-01 7.331349e-01 8.010854e-01 8.454407e-01
pst_pw[noise !=0 , .(p.val.pears = t.test(Pearson)$p.value,
p.val.spear = t.test(Spearman)$p.value,
p.val.kenda = t.test(Kendall)$p.value,
p.val.ovrlp = t.test(Overlap, mu = 0.5)$p.value), by = noise]
## noise p.val.pears p.val.spear p.val.kenda p.val.ovrlp
## 1: 0.2 0 0 0 0.000000e+00
## 2: 0.4 0 0 0 0.000000e+00
## 3: 0.6 0 0 0 0.000000e+00
## 4: 0.8 0 0 0 4.092725e-132
## 5: 1.0 0 0 0 7.847687e-153
## 6: 1.2 0 0 0 4.718306e-21
## 7: 1.4 0 0 0 2.799214e-37
## 8: 1.6 0 0 0 8.024573e-04
## 9: 1.8 0 0 0 1.291937e-01
## 10: 2.0 0 0 0 2.515917e-02
## 11: 2.2 0 0 0 7.457545e-01
## 12: 2.4 0 0 0 4.644773e-01
## 13: 2.6 0 0 0 4.108491e-02
## 14: 2.8 0 0 0 3.546402e-01
## 15: 3.0 0 0 0 4.503814e-01
na_pw[noise !=0 , .(p.val.pears = t.test(Pearson)$p.value,
p.val.spear = t.test(Spearman)$p.value,
p.val.kenda = t.test(Kendall)$p.value,
p.val.ovrlp = t.test(Overlap, mu = 0.5)$p.value), by = noise]
## noise p.val.pears p.val.spear p.val.kenda p.val.ovrlp
## 1: 0.2 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 2: 0.4 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 3: 0.6 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 4: 0.8 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 5: 1.0 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
## 6: 1.2 0.000000e+00 0.000000e+00 0.000000e+00 1.305400e-282
## 7: 1.4 0.000000e+00 0.000000e+00 0.000000e+00 2.408321e-205
## 8: 1.6 0.000000e+00 0.000000e+00 0.000000e+00 1.705187e-136
## 9: 1.8 0.000000e+00 0.000000e+00 0.000000e+00 3.373701e-130
## 10: 2.0 4.635649e-227 4.416412e-217 4.842722e-216 1.285686e-53
## 11: 2.2 5.729161e-164 1.588502e-154 1.840027e-152 1.760740e-39
## 12: 2.4 1.443955e-150 2.726606e-137 3.168601e-136 1.281350e-48
## 13: 2.6 2.874841e-80 1.780331e-74 3.378267e-73 3.500463e-16
## 14: 2.8 1.287741e-70 4.664002e-61 7.360538e-60 8.853890e-12
## 15: 3.0 2.298239e-47 5.974382e-43 1.797761e-42 1.436581e-11
If we increase sampling rate, it means that there is more points per oscillation. Visually this means that the average trajectory is smoother.
plot_sim(sim_phase_shifted(n = 50, noise = 0.4, freq = 2), use.facet = F)
plot_sim(sim_phase_shifted(n = 50, noise = 0.4, freq = 0.2), use.facet = F)
The role of the rolling mean in clipping, is to “cut the oscillation in 2”. A good case is like:
# Contains 6 to 7 points per oscillation
sim <- sim_phase_shifted(n = 1, noise = 0, freq = 1)
plot(sim$Time, sim$value, type = "b")
lines(x=sim$Time, y=rollex(sim$value, k = 6), col = "red", lwd = 1.5)
# clip trajectory and scale for clarity on the plot
clip_traj <- wrap_clip(x = sim$value, k = 6)
clip_traj <- ifelse(clip_traj == 0, -1, 1)
lines(x=sim$Time, y=clip_traj, col = "blue", type = "S")
It is a good case since the rolling mean (in red), cuts the trajectory (in black) at half-period (i.e. near to 0). This results in a clipped trajectory (in blue) that is up when the oscillation is “above 0” and down when its “below 0”.
Now we multiply the sampling rate by 5, but keep the same window size for the moving average in the top case, and multiply it also by 5 in the bottom case:
par(mfrow = c(2,1))
# Contains more than 35 points per oscillation
sim <- sim_phase_shifted(n = 1, noise = 0, freq = 0.2)
size_window <- 6
plot(sim$Time, sim$value, type = "b")
lines(x = sim$Time, y = rollex(sim$value, k = size_window), col = "red", lwd = 1.5)
clip_traj <- wrap_clip(x = sim$value, k = size_window)
clip_traj <- ifelse(clip_traj == 0, -1, 1)
lines(x = sim$Time, y = clip_traj, col = "blue", type = "S")
size_window <- 35
plot(sim$Time, sim$value, type = "b")
lines(x = sim$Time, y = rollex(sim$value, k = size_window), col = "red", lwd = 1.5)
clip_traj <- wrap_clip(x = sim$value, k = size_window)
clip_traj <- ifelse(clip_traj == 0, -1, 1)
lines(x = sim$Time, y = clip_traj, col = "blue", type = "S")
As we can see the moving average follows the signal perfectly with the small window, whereas it is out of phase with the larger window with much smaller amplitude. Clipping trajectory is ok in both cases. Nevertheless the top case is bad, why? Don’t forget that we’ve been working with perfect sinusoid so far, let’s see what happens with a tiny bit of noise in the amplitude of the signal:
par(mfrow = c(2,1))
# Contains more than 35 points per oscillation
sim <- sim_noisy_amplitude(n = 1, noise = 0.1, freq = 0.2)
size_window <- 6
plot(sim$Time, sim$value, type = "b")
lines(x = sim$Time, y = rollex(sim$value, k = size_window), col = "red", lwd = 1.5)
clip_traj <- wrap_clip(x = sim$value, k = size_window)
clip_traj <- ifelse(clip_traj == 0, -1, 1)
lines(x = sim$Time, y = clip_traj, col = "blue", type = "S")
size_window <- 35
plot(sim$Time, sim$value, type = "b")
lines(x = sim$Time, y = rollex(sim$value, k = size_window), col = "red", lwd = 1.5)
clip_traj <- wrap_clip(x = sim$value, k = size_window)
clip_traj <- ifelse(clip_traj == 0, -1, 1)
lines(x = sim$Time, y = clip_traj, col = "blue", type = "S")
As we observe, clipped trajectory becomes completely random with the small window, whereas it remains identical with the larger one. So we should choose the window size for the rolling mean such that it covers the number of points in an oscillation.
On multiple simulations the distributions look like:
# Already defined in section 4.
# na <- multi_sims(type = "na", noises = noises, n = n, freq = 1)
# na_pw <- all_pairwise_stats(data = na, condition = cond, label = lab, measure = mea, k_roll_mean = 5)
# q3 <- ggplot(na_pw, aes(x = noise, y = Overlap)) + geom_boxplot(aes(group = noise)) + ggtitle("Noisy Amplitude") + scale_y_continuous(limits = c(0,1))
na_high_freq <- multi_sims(type = "na", noises = noises, n = n, freq = 0.2)
na_high_freq_pw <- all_pairwise_stats(data = na_high_freq, condition = cond, label = lab, measure = mea, k_roll_mean = 5)
q3_high <- ggplot(na_high_freq_pw, aes(x = noise, y = Overlap)) + geom_boxplot(aes(group = noise)) + ggtitle("Window Size: 5; Freq: 0.2 (window too small)") + scale_y_continuous(limits = c(0,1))
grid.arrange(q3 + ggtitle("Window Size: 5; Freq: 1 (window of the right size)"), q3_high, ncol = 2)
To assess this we define a new kind of simulations, which are made of “pulsed” exponential decays. The signal behaves as regular exponential decay but once in a while, a stimulus brings the signal back to its maximum (i.e. 1). The noise in these simulation is a random lag between the time at which the stimulus is applied and the time at which the signal is effectively going up.
# edls: exponential decay with lagged stimulation; 5 time units between each pulse
edls <- multi_sims(type = "edls", noises = noises, n = n, freq = 0.2, interval.stim = 5)
plot_sim(edls)
We can represent the stimuli pattern by another time series:
# Array of 0 of the same length as a trajectory
stim <- rep(0, 496)
stim[seq(25,496,25)] <- 1
# Plot a trajectory without lag along with the stimulation pattern
plot(edls[variable=="V1" & noise==0, value], type = "l", lwd = 2)
lines(stim, type="s", col = "blue", lwd = 1)
Cross-correlation (or convolution, definitions seems to vary from one author to another, but it’s the same operation provided that we’ve flipped the stimulation pattern beforehand and have normalized the output; convolution is easier to interprete as a weighted average of one signal, weights being given by the other signal) computes an array of correlations of one signal by sliding another signal on it. The higher this correlation the more the curves look alike with the given sliding shift (lag).
ccf(edls[variable=="V1" & noise==0, value], stim, ylab = "Correlation coefficient")
cor(edls[variable=="V1" & noise==0, value], stim)
## [1] -0.2198154
We see that the correlation without shift is actually negative whereas it is maximal if we shift the stimulation pattern by 1. The optimal lag is 1 and not 0 due to the way simulations were built. Provided that we sample the signal every 0.2 time unit, see edls <- multi_sims(type = "edls", noises = noises, n = n, freq = 0.2, interval.stim = 5), this would mean that the cells max response happens 0.2 time unit after the stimulus (once again here we should read 0, but it’s an artefact of the simulation building).
Here we’ll isolate the highest peak of cross-correlation between each trajectory and the stimulation pattern to essentially look at 2 things: the height of the peak which should inform about the strength of the association (the “causality” of the stimulation on the signal) and the associated lag (how long does it take for the cells to fully respond to the signal).
get.max.cc <- function(ts1, ts2, plot = F, ...){
temp <- ccf(ts1, ts2, plot = plot, ...)
max.indx <- which.max(temp$acf)
return(list(temp$acf[max.indx], temp$lag[max.indx]))
}
cc.stim <- copy(edls)
cc.stim[, c("max.cc", "lag") := get.max.cc(value, stim, plot = F), by = .(noise, variable)]
cc.stim[, noise := as.factor(noise)]
ggplot(cc.stim, aes(x = noise, y = max.cc)) + geom_boxplot(aes(group=noise))
ggplot(cc.stim, aes(x = noise, y = lag)) + geom_boxplot(aes(group=noise))
Note that in these simulations the lag is random for each pulse within a single trajectory, we might expect the cells to have a more robust response that will always provide “more or less” the same lag. In other words, in the simulations we add a mirrored normal noise centered around 0 to determine the lag and change the sd according to the noise, but we could also change the mean of the normal distribution.
Insensitive to input amplitude:
cc.stim[, value := value * 3]
cc.stim[, c("max.cc", "lag") := get.max.cc(value, stim, plot = F), by = .(noise, variable)]
ggplot(cc.stim, aes(x = noise, y = max.cc)) + geom_boxplot(aes(group=noise))
ggplot(cc.stim, aes(x = noise, y = lag)) + geom_boxplot(aes(group=noise))
Exactly same results as before.
We perform simulations without lag in the response, but add white noise to the signal amplitude:
# Run the simulation without lag in response 15 times (14 + 1 internally computed)
edls <- multi_sims(type = "edls", noises = rep(0, 14), n = n, freq = 0.2, interval.stim = 5)
# Add the white noise with different sd
nber.per.condition <- 496 * n # 496 data points per trajectory
# Define the levels of noise
nois <- rep(seq(0, 0.56, 0.04), each = nber.per.condition)
edls$noise <- nois
# Produce the noise and add it
nois <- sapply(nois, function(x) 0 + rnorm(1, mean = 0, sd = x))
edls[, value := value + nois]
plot_sim(edls)
Even though the mean trajectory look alike along noise level, individual trajectories get completly denatured with the highest levels of noise. Remember that the signal vary between 0 and 1; for a noise level of 0.28, the probability to get a noise of 0.1 or more is: 0.3604924; it becomes 0.4291371 for a noise level of 0.56.
par(mfrow = c(3,1))
plot(edls[variable == "V1" & noise == 0, value], type = "l"); abline(h=c(0,1), lty='dashed')
plot(edls[variable == "V1" & noise == 0.28, value], type = "l"); abline(h=c(0,1), lty='dashed')
plot(edls[variable == "V1" & noise == 0.56, value], type = "l"); abline(h=c(0,1), lty='dashed')
Effect on cross-correlation with the stimulation pattern:
cc.stim <- copy(edls)
cc.stim[, c("max.cc", "lag") := get.max.cc(value, stim, plot = F), by = .(noise, variable)]
cc.stim[, noise := as.factor(noise)]
ggplot(cc.stim, aes(x = noise, y = max.cc)) + geom_boxplot(aes(group=noise))
ggplot(cc.stim, aes(x = noise, y = lag)) + geom_boxplot(aes(group=noise))
We see that CC is much less sensitive to noise in amplitude than to noise in lag before response.
We introduce damped sinusoids simulations with noise in phase and a linear decrease.
# psd: phase_shifted_damped, dampen_params: c(inital amplitude, decay rate)
nad <- multi_sims(type = "nad", noises = noises, n = n, dampen_params = c(1, 0.01))
# Add linear trend
nad[, value := value + seq(0, -0.5, length.out = 100)]
# Moving average
nad[, moving_average := rollex(x = value, k = 6), by = c("noise", "variable")]
plot_sim(nad) + scale_y_continuous(limits = c(-1.5,1.5))
## Warning: Removed 25645 rows containing non-finite values (stat_summary).
## Warning: Removed 55 rows containing missing values (geom_path).
We want try 2 different metrics for estimating the long-term decrease: the slope of linear regression of moving average, and the slope after maxima fitting:
slope_ma <- nad[, .(slope = coef(lm(moving_average ~ Time))[2]), by = c("noise", "variable")]
ggplot(slope_ma, aes(x=as.factor(noise),y=slope)) + geom_boxplot(aes(fill=noise)) + theme(text = element_text(size = 45)) + ggtitle("Slopes from moving average")
detect.maxima <- function(x, window.size){
require(zoo)
if(window.size %% 2 == 0) stop("window size must be odd")
middle <- ceiling(window.size / 2)
xz <- as.zoo(x)
out <- rollapply(x, window.size, function(x) which.max(x) == middle)
out <- c(rep(NA, middle-1), out, rep(NA, middle-1))
return(out)
}
detect.minima <- function(x, window.size){
require(zoo)
if(window.size %% 2 == 0) stop("window size must be odd")
middle <- ceiling(window.size / 2)
xz <- as.zoo(x)
out <- rollapply(x, window.size, function(x) which.min(x) == middle)
out <- c(rep(NA, middle-1), out, rep(NA, middle-1))
return(out)
}
wrap_max_reg <- function(traj, window.size, min.index, max.index, robust = F){
# min.index, max.index delimits the region of interest where we are looking for maxima
# 0) Adjust window size to an odd number
if(window.size == 10){window.size <- 9}
else if(window.size == 20){window.size <- 19}
# 1) Where are the maxima
max.pos <- which(detect.maxima(traj, window.size))
# Keep only maxima occuring at time > min.index
max.pos <- max.pos[max.pos >= min.index & max.pos <= max.index]
nber <- length(max.pos)
max.traj <- traj[max.pos]
# 2) Perform the fitting
# If no maxima detected, run simple linear regression, not only on maxima (not robust as this results in an error)
if(robust){
require(mblm)
fit <- try(mblm(max.traj ~ max.pos))
if(class(fit)== "try-error"){
nber <- numeric(0)
fit <- lm(traj ~ seq_along(traj))
}
p.val <- try(coef(summary(fit))[2, "Pr(>|V|)"])
} else {
fit <- try(lm(max.traj ~ max.pos))
if(class(fit)== "try-error"){
nber <- numeric(0)
fit <- lm(traj ~ seq_along(traj))
}
p.val <- try(coef(summary(fit))[2, "Pr(>|t|)"])
}
# p-values not return for regression performed on small vectors
if(class(p.val) == "try-error") p.val <- 2
return(list(coeff = fit$coefficients[2], p.val = p.val, nber.max = nber))
}
kwindow = 5
mini = 0
maxi = 1e6
robust = F
reg.max <- nad[, .(reg.slope = wrap_max_reg(value, kwindow, mini, maxi, robust)$coeff,
p.val = wrap_max_reg(value, kwindow, mini, maxi, robust)$p.val,
nber.max = wrap_max_reg(value, kwindow, mini, maxi, robust)$nber.max),
by = c("noise", "variable")]
ggplot(reg.max, aes(x=as.factor(noise),y=reg.slope)) + geom_boxplot(aes(fill=noise)) + theme(text = element_text(size = 45)) + ggtitle("Slopes")
ggplot(reg.max, aes(x=as.factor(noise),y=nber.max)) + geom_boxplot(aes(fill=noise)) + theme(text = element_text(size = 25)) + ggtitle("Number of maxima detected")
With robust fitting:
kwindow = 5
mini = 0
maxi = 1e6
robust = T
reg.max <- nad[, .(reg.slope = wrap_max_reg(value, kwindow, mini, maxi, robust)$coeff,
p.val = wrap_max_reg(value, kwindow, mini, maxi, robust)$p.val,
nber.max = wrap_max_reg(value, kwindow, mini, maxi, robust)$nber.max),
by = c("noise", "variable")]
## Loading required package: mblm
ggplot(reg.max, aes(x=as.factor(noise),y=reg.slope)) + geom_boxplot(aes(fill=noise)) + theme(text = element_text(size = 45)) + ggtitle("Slopes")
ggplot(reg.max, aes(x=as.factor(noise),y=nber.max)) + geom_boxplot(aes(fill=noise)) + theme(text = element_text(size = 25)) + ggtitle("Number of maxima detected")